if ( hypercall.op != __HYPERVISOR_grant_table_op )
return -ENOSYS;
++#ifdef __ia64__
++ ret = HYPERVISOR_grant_table_op(hypercall.arg[0], (void *)hypercall.arg[1], hypercall.arg[2]);
++#else
/* hypercall-invoking asm taken from privcmd.c */
__asm__ __volatile__ (
"pushl %%ebx; pushl %%ecx; pushl %%edx; "
TRAP_INSTR "; "
"popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
: "=a" (ret) : "0" (&hypercall) : "memory" );
++#endif
return ret;
}
BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1));
BUG_ON(setup.status != 0);
++#ifdef __ia64__
++ shared = __va(frames[0] << PAGE_SHIFT);
++ printk("grant table at %p\n", shared);
++#else
for (i = 0; i < NR_GRANT_FRAMES; i++)
set_fixmap(FIX_GNTTAB_END - i, frames[i] << PAGE_SHIFT);
++#endif
return 0;
}
BUG_ON(gnttab_resume());
++#ifndef __ia64__
shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB_END);
++#endif
for (i = NR_RESERVED_ENTRIES; i < NR_GRANT_ENTRIES; i++)
gnttab_list[i] = i + 1;
static unsigned long mmap_vstart;
#define MMAP_PAGES \
(MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
++#ifdef __ia64__
++static void *pending_vaddrs[MMAP_PAGES];
++#define MMAP_VADDR(_idx, _i) \
++ (unsigned long)(pending_vaddrs[((_idx) * BLKIF_MAX_SEGMENTS_PER_REQUEST) + (_i)])
++#else
#define MMAP_VADDR(_req,_seg) \
(mmap_vstart + \
((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \
((_seg) * PAGE_SIZE))
++#endif
/*
* Each outstanding request that we've passed to the lower device layers has a
map[i].flags |= GNTMAP_readonly;
}
- BUG_ON(HYPERVISOR_grant_table_op(
- GNTTABOP_map_grant_ref, map, nseg));
+ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
+ BUG_ON(ret);
for (i = 0; i < nseg; i++) {
- if (unlikely(map[i].handle < 0)) {
- DPRINTK("invalid buffer -- could not remap it\n");
- fast_flush_area(pending_idx, nseg);
- goto bad_descriptor;
+ if (likely(map[i].handle >= 0)) {
+ pending_handle(pending_idx, i) = map[i].handle;
++#ifdef __ia64__
++ MMAP_VADDR(pending_idx,i) = gnttab_map_vaddr(map[i]);
++#else
+ phys_to_machine_mapping[__pa(MMAP_VADDR(
+ pending_idx, i)) >> PAGE_SHIFT] =
+ FOREIGN_FRAME(map[i].dev_bus_addr>>PAGE_SHIFT);
++#endif
+ fas = req->frame_and_sects[i];
+ seg[i].buf = map[i].dev_bus_addr |
+ (blkif_first_sect(fas) << 9);
+ } else {
+ errors++;
}
-
- phys_to_machine_mapping[__pa(MMAP_VADDR(
- pending_idx, i)) >> PAGE_SHIFT] =
- FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT);
-
- pending_handle(pending_idx, i) = map[i].handle;
}
- for (i = 0; i < nseg; i++) {
- fas = req->frame_and_sects[i];
- seg[i].buf = map[i].dev_bus_addr |
- (blkif_first_sect(fas) << 9);
+ if (errors) {
+ DPRINTK("invalid buffer -- could not remap it\n");
+ fast_flush_area(pending_idx, nseg);
+ goto bad_descriptor;
}
if (vbd_translate(&preq, blkif, operation) != 0) {
{
int i;
struct page *page;
+ int ret;
+
+ for (i = 0; i < MMAP_PAGES; i++)
+ pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
+ if (xen_init() < 0)
+ return -ENODEV;
+
blkif_interface_init();
++#ifdef __ia64__
++ {
++ extern unsigned long alloc_empty_foreign_map_page_range(unsigned long pages);
++ int i;
++
++ mmap_vstart = alloc_empty_foreign_map_page_range(MMAP_PAGES);
++ printk("Allocated mmap_vstart: 0x%lx\n", mmap_vstart);
++ for(i = 0; i < MMAP_PAGES; i++)
++ pending_vaddrs[i] = mmap_vstart + (i << PAGE_SHIFT);
++ BUG_ON(mmap_vstart == NULL);
++ }
++#else
page = balloon_alloc_empty_page_range(MMAP_PAGES);
BUG_ON(page == NULL);
mmap_vstart = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
++#endif
pending_cons = 0;
pending_prod = MAX_PENDING_REQS;